import glob
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Import everything needed to edit/save/watch video clips
from IPython.display import HTML
from moviepy.editor import ImageSequenceClip
from moviepy.editor import VideoFileClip
import image
from lane import Lane
Lane objects¶test_images = glob.glob('../../Repos/CarND-Advanced-Lane-Lines/test_images/*.jpg')
lanes_list = []
for imgpath in test_images:
lanes_list.append(Lane(imgpath=imgpath))
img_size = cv2.imread(test_images[0]).shape[1::-1]
offset_x = 300
offset_y = 0
src = np.float32([[595, 450], [688, 450], [1015, 660], [295, 660]])
src = np.float32([[595, 450], [685, 450], [1000, 660], [280, 660]])
dst = np.float32([[offset_x, offset_y],
[img_size[0]-offset_x, offset_y],
[img_size[0]-offset_x, img_size[1]-offset_y],
[offset_x, img_size[1]-offset_y]])
# use cv2.getPerspectiveTransform() to get M and Minv, the transform matrix and inverse transform matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst,src)
# window settings
window_width = 50 # width of the convolution windows --> must be even!
window_height = 80 # Break image into 9 vertical layers since image height is 720
margin_naive = 50 # How much to slide left and right for searching
margin_prior = 50#25
minsum = 3
assert window_width % 2 == 0, '`window_width` must be even'
calibration_images = glob.glob('../../Repos/CarND-Advanced-Lane-Lines/camera_cal/*.jpg')
nx = 9
ny = 6
mtx, dist = image.calibrate_camera(calibration_images, nx, ny)
l = Lane(imgpath='../../Repos/CarND-Advanced-Lane-Lines/camera_cal/calibration8.jpg')
undistorted = l.get_undistorted(mtx, dist, outfile='../../Projects/Project_04/output_images/undistorted_calibration8.jpg')
image.show_images(l.img, 'calibration8.jpg', undistorted, 'Undistorted calibration8.jpg')
outfile = '../../Projects/Project_04/output_images/undistorted/undistorted_{0}'
for l in lanes_list:
undistorted = l.get_undistorted(mtx, dist, outfile=outfile.format(os.path.basename(l.imgpath)))
image.show_images(l.img, 'Original Image', undistorted, 'Undistorted Image')
outfile = '../../Projects/Project_04/output_images/binary/binary_{0}'
for l in lanes_list:
binary = l.get_binary(mtx, dist, outfile=outfile.format(os.path.basename(l.imgpath)))
image.show_images(l.img, 'Original Image', binary*255, 'Binary Image')
outfile = '../../Projects/Project_04/output_images/perspective/perspective_{0}'
for l in lanes_list:
undistorted = l.get_undistorted(mtx, dist, src)
perspective = l.get_perspective(mtx, dist, M, dst, outfile=outfile.format(os.path.basename(l.imgpath)))
image.show_images(undistorted, 'Undistorted Original Image', perspective, 'Perspective Image')
outfile = '../../Projects/Project_04/output_images/binary_perspective/binary_perspective_{0}'
for l in lanes_list:
undistorted = l.get_undistorted(mtx, dist)
binary_perspective = l.get_binary_perspective(mtx, dist, M, outfile=outfile.format(os.path.basename(l.imgpath)))
image.show_images(l.img, 'Undistorted Original Image', binary_perspective*255, 'Binary Perspective Image')
outfile = '../../Projects/Project_04/output_images/windows/windows_{0}'
for l in lanes_list:
window_centroids = l.find_window_centroids(mtx, dist, M, window_width, window_height, margin_naive, minsum)
l.plot_window_centroids(mtx, dist, M, window_centroids, window_width, window_height,
outfile=outfile.format(os.path.basename(l.imgpath)))
outfile = '../../Projects/Project_04/output_images/lines_perspective/lines_perspective_{0}'
for l in lanes_list:
l.fit_lines(mtx, dist, M, margin_naive, margin_prior, window_width, window_height, minsum, d=2)
l.plot_lines_perspective(mtx, dist, M, margin_naive, outfile=outfile.format(os.path.basename(l.imgpath)))
See Section 8.
See Section 8.
outfile = '../../Projects/Project_04/output_images/lines/lines_{0}'
for l in lanes_list:
l.plot_lines(mtx, dist, M, Minv, show_plot=True, outfile=outfile.format(os.path.basename(l.imgpath)))
clip = '../../Repos/CarND-Advanced-Lane-Lines/project_video.mp4'
# use the previous `n` frames to help detect the lane
n = 3
# https://tobilehman.com/blog/2013/01/20/extract-array-of-frames-from-mp4-using-python-opencv-bindings/
vidcap = cv2.VideoCapture(clip)
count = 0
lanes_list = []
success = True
while success:
success, img = vidcap.read()
if success:
l = Lane(img)
if count > 0:
l.prev = lanes_list[max(count-n, 0):count][::-1]
lanes_list.append(l)
if cv2.waitKey(10) == 27: # exit if Escape is hit
break
count += 1
for l in lanes_list:
l.fit_lines(mtx, dist, M, margin_naive, margin_prior, window_width, window_height, minsum, d=2)
output = '../../Projects/Project_04/project_video_lanes.mp4'
clip = ImageSequenceClip([l.plot_lines(mtx, dist, M, Minv)[:, :, ::-1] for l in lanes_list], fps=30)
clip.write_videofile(output)